[IA64] Merge vpsr.i with evtchn_upcall_mask tosolve one trickish bug
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Thu, 30 Mar 2006 16:55:26 +0000 (09:55 -0700)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Thu, 30 Mar 2006 16:55:26 +0000 (09:55 -0700)
Per agreement on the summit, xen/ia64 will move to event channel
model same as xen/x86, under which event is the layer under pirq
(external interrupt), virq, and ipi with the latter three bound
to event ports. Within that model, no external interrupt will be
injected directly and evtchn_upcall_mask is the flag to  control
whether events are deliverable.

So xenlinux needs to operate evtchn_upcall_mask at all places
where it originally operates vpsr.i. However these two flags are
presented at different shared area, and thus xenlinux can't ensure
atomical update on two flags which leaves severe stability issues.
One severe bug comes for this reason where some hypercall may be
restarted infinitely when events pending.

Actually based on description of future model, events become the
superset of external interrupts and thus evtchn_upcall_mask super-
set of vpsr.i (interrupt_delivery_enable). We can merge two flags
into one by removing the latter. By this way, we ensure correctness
and most importantly conform to common code which always assumes
upon evtchn_upcall_mask.

Signed-off-by Kevin Tian <kevin.tian@intel.com>

15 files changed:
linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c
linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S
linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S
linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S
linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S
linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
xen/arch/ia64/asm-xsi-offsets.c
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/hyperprivop.S
xen/arch/ia64/xen/ivt.S
xen/arch/ia64/xen/process.c
xen/arch/ia64/xen/vcpu.c
xen/arch/ia64/xen/xentime.c
xen/include/asm-ia64/domain.h
xen/include/public/arch-ia64.h

index 8d441df8acf8a9106e27b1893f8b52426dc0ec37..5c461ba53b3c5bfcbe9148c8b51b56afbdc133cf 100644 (file)
@@ -5,6 +5,7 @@
 #include <asm/sal.h>
 #include <asm/hypervisor.h>
 /* #include <asm-xen/evtchn.h> */
+#include <xen/interface/arch-ia64.h>
 #include <linux/vmalloc.h>
 
 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)0xf100000000000000;
index 6a12ed9ec3f2d25022b080dcfc28b3771d9e8d42..2adcd3ed568afdacb2e8afa577396c61be5a864a 100644 (file)
@@ -8,6 +8,27 @@
 #include <asm/processor.h>
 #include <asm/asmmacro.h>
 
+/* To clear vpsr.ic, vpsr.i needs to be cleared first */
+#define XEN_CLEAR_PSR_IC                               \
+       mov r14=1;                                      \
+       movl r15=XSI_PSR_I_ADDR;                        \
+       movl r2=XSI_PSR_IC;                             \
+       ;;                                              \
+       ld8 r15=[r15];                                  \
+       ld4 r3=[r2];                                    \
+       ;;                                              \
+       ld1 r16=[r15];                                  \
+       ;;                                              \
+       st1 [r15]=r14;                                  \
+       st4 [r2]=r0;                                    \
+       ;;
+
+/* First restore vpsr.ic, and then vpsr.i */
+#define XEN_RESTORE_PSR_IC                             \
+       st4 [r2]=r3;                                    \
+       st1 [r15]=r16;                                  \
+       ;;
+
 GLOBAL_ENTRY(xen_get_ivr)
        movl r8=running_on_xen;;
        ld4 r8=[r8];;
@@ -15,15 +36,12 @@ GLOBAL_ENTRY(xen_get_ivr)
 (p7)   mov r8=cr.ivr;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_IVR
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
+       ;;
        br.ret.sptk.many rp
        ;;
 END(xen_get_ivr)
@@ -35,15 +53,12 @@ GLOBAL_ENTRY(xen_get_tpr)
 (p7)   mov r8=cr.tpr;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_TPR
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
+       ;;
        br.ret.sptk.many rp
        ;;
 END(xen_get_tpr)
@@ -55,16 +70,14 @@ GLOBAL_ENTRY(xen_set_tpr)
 (p7)   mov cr.tpr=r32;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_TPR
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
+       ;;
        br.ret.sptk.many rp
        ;;
 END(xen_set_tpr)
@@ -76,16 +89,14 @@ GLOBAL_ENTRY(xen_eoi)
 (p7)   mov cr.eoi=r0;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_EOI
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
+       ;;
        br.ret.sptk.many rp
        ;;
 END(xen_eoi)
@@ -97,16 +108,13 @@ GLOBAL_ENTRY(xen_thash)
 (p7)   thash r8=r32;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_THASH
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -119,16 +127,13 @@ GLOBAL_ENTRY(xen_set_itm)
 (p7)   mov cr.itm=r32;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_ITM
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -141,17 +146,14 @@ GLOBAL_ENTRY(xen_ptcga)
 (p7)   ptc.ga r32,r33;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r11=XSI_PSR_IC
        mov r8=r32
        mov r9=r33
        ;;
-       ld8 r10=[r11]
-       ;;
-       st8 [r11]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_PTC_GA
        ;;
-       st8 [r11]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -164,16 +166,13 @@ GLOBAL_ENTRY(xen_get_rr)
 (p7)   mov r8=rr[r32];;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_RR
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -186,17 +185,14 @@ GLOBAL_ENTRY(xen_set_rr)
 (p7)   mov rr[r32]=r33;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r11=XSI_PSR_IC
        mov r8=r32
        mov r9=r33
        ;;
-       ld8 r10=[r11]
-       ;;
-       st8 [r11]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_RR
        ;;
-       st8 [r11]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -241,17 +237,14 @@ GLOBAL_ENTRY(xen_set_kr)
 (p7)   mov ar7=r9
 (p7)   br.ret.sptk.many rp;;
 
-1:     movl r11=XSI_PSR_IC
-       mov r8=r32
+1:     mov r8=r32
        mov r9=r33
        ;;
-       ld8 r10=[r11]
-       ;;
-       st8 [r11]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_KR
        ;;
-       st8 [r11]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_set_rr)
@@ -263,16 +256,13 @@ GLOBAL_ENTRY(xen_fc)
 (p7)   fc r32;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_FC
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_fc)
@@ -284,16 +274,13 @@ GLOBAL_ENTRY(xen_get_cpuid)
 (p7)   mov r8=cpuid[r32];;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_CPUID
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_get_cpuid)
@@ -305,16 +292,13 @@ GLOBAL_ENTRY(xen_get_pmd)
 (p7)   mov r8=pmd[r32];;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_PMD
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_get_pmd)
@@ -327,16 +311,13 @@ GLOBAL_ENTRY(xen_get_eflag)
 (p7)   mov r8=ar24;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_EFLAG
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_get_eflag)
@@ -349,16 +330,13 @@ GLOBAL_ENTRY(xen_set_eflag)
 (p7)   mov ar24=r32
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
        mov r8=r32
        ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_EFLAG
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_set_eflag)
index 0d1ded964a98bb5e27e8d34b70439854b69b8f48..3215aee55af23804af5c0200886d4d7f0905fb0e 100644 (file)
@@ -312,9 +312,12 @@ ENTRY(ia64_leave_syscall)
        cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
 #else /* !CONFIG_PREEMPT */
 #ifdef CONFIG_XEN
-       movl r2=XSI_PSR_I
+       movl r2=XSI_PSR_I_ADDR
+       mov r18=1
        ;;
-(pUStk)        st4 [r2]=r0
+       ld8 r2=[r2]
+       ;;
+(pUStk)        st1 [r2]=r18
 #else
 (pUStk)        rsm psr.i
 #endif
@@ -345,9 +348,14 @@ ENTRY(ia64_leave_syscall)
        ;;
        invala                  // M0|1 invalidate ALAT
 #ifdef CONFIG_XEN
+       movl r28=XSI_PSR_I_ADDR
        movl r29=XSI_PSR_IC
        ;;
-       st8     [r29]=r0        // note: clears both vpsr.i and vpsr.ic!
+       ld8 r28=[r28]
+       mov r30=1
+       ;;
+       st1     [r28]=r30
+       st4     [r29]=r0        // note: clears both vpsr.i and vpsr.ic!
        ;;
 #else
        rsm psr.i | psr.ic      // M2 initiate turning off of interrupt and interruption collection
@@ -441,9 +449,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
        cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
 #else
 #ifdef CONFIG_XEN
-(pUStk)        movl r17=XSI_PSR_I
-       ;;
-(pUStk)        st4 [r17]=r0
+(pUStk)        movl r17=XSI_PSR_I_ADDR
+(pUStk)        mov r31=1
+               ;;
+(pUStk)        ld8 r17=[r17]
+               ;;
+(pUStk)        st1 [r17]=r31
        ;;
 #else
 (pUStk)        rsm psr.i
@@ -496,9 +507,14 @@ GLOBAL_ENTRY(ia64_leave_kernel)
        mov ar.ssd=r31
        ;;
 #ifdef CONFIG_XEN
+       movl r23=XSI_PSR_I_ADDR
        movl r22=XSI_PSR_IC
        ;;
-       st8 [r22]=r0            // note: clears both vpsr.i and vpsr.ic!
+       ld8 r23=[r23]
+       mov r25=1
+       ;;
+       st1 [r23]=r25
+       st4 [r22]=r0            // note: clears both vpsr.i and vpsr.ic!
        ;;
 #else
        rsm psr.i | psr.ic      // initiate turning off of interrupt and interruption collection
@@ -803,9 +819,12 @@ skip_rbs_switch:
        br.call.spnt.many rp=schedule
 .ret9: cmp.eq p6,p0=r0,r0                              // p6 <- 1
 #ifdef CONFIG_XEN
-       movl r2=XSI_PSR_I
+       movl r2=XSI_PSR_I_ADDR
+       mov r20=1
+       ;;
+       ld8 r2=[r2]
        ;;
-       st4 [r2]=r0
+       st1 [r2]=r20
 #else
        rsm psr.i               // disable interrupts
 #endif
index 793d1b4abd5f5729bbf58e5125f3ed0d124fa91a..8ec8b5e546055cac9ddec021815807bb8aa4d619 100644 (file)
@@ -683,9 +683,11 @@ ENTRY(dkey_miss)
        // Leaving this code inline above results in an IVT section overflow
        // There is no particular reason for this code to be here...
 xen_page_fault:
-(p15)  movl r3=XSI_PSR_I
+(p15)  movl r3=XSI_PSR_I_ADDR
        ;;
-(p15)  st4 [r3]=r14,XSI_PEND-XSI_PSR_I         // if (p15) vpsr.i = 1
+(p15)  ld8 r3=[r3]
+       ;;
+(p15)  st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR     // if (p15) vpsr.i = 1
        mov r14=r0
        ;;
 (p15)  ld4 r14=[r3]                            // if (pending_interrupts)
@@ -1043,9 +1045,11 @@ ENTRY(break_fault)
        mov r16=1
        ;;
 #if 1
-       st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC       // vpsr.ic = 1
+       st4 [r3]=r16,XSI_PSR_I_ADDR-XSI_PSR_IC  // vpsr.ic = 1
+       ;;
+(p15)  ld8 r3=[r3]
        ;;
-(p15)  st4 [r3]=r16,XSI_PEND-XSI_PSR_I         // if (p15) vpsr.i = 1
+(p15)  st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR     // if (p15) vpsr.i = 1
        mov r16=r0
        ;;
 (p15)  ld4 r16=[r3]                            // if (pending_interrupts)
@@ -1055,10 +1059,12 @@ ENTRY(break_fault)
 (p6)   ssm     psr.i                           //   do a real ssm psr.i
        ;;
 #else
-//     st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC       // vpsr.ic = 1
-       adds r3=XSI_PSR_I-XSI_PSR_IC,r3         // SKIP vpsr.ic = 1
+//     st4 [r3]=r16,XSI_PSR_I_ADDR-XSI_PSR_IC  // vpsr.ic = 1
+       adds r3=XSI_PSR_I_ADDR-XSI_PSR_IC,r3    // SKIP vpsr.ic = 1
+       ;;
+(p15)  ld8 r3=[r3]
        ;;
-(p15)  st4 [r3]=r16,XSI_PEND-XSI_PSR_I         // if (p15) vpsr.i = 1
+(p15)  st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR     // if (p15) vpsr.i = 1
        mov r16=r0
        ;;
 (p15)  ld4 r16=[r3]                            // if (pending_interrupts)
index 2fd45e2984daf3e02cfcde18d74d6b2e667fb5e3..bfeff667dd628b8b508e6c8609913a20c14dca27 100644 (file)
@@ -43,11 +43,14 @@ GLOBAL_ENTRY(xen_pal_call_static)
        // from the idle loop so confuses privop counting
        movl r31=XSI_PSR_IC
        ;;
-(p6)   st8 [r31]=r0
+(p6)   st4 [r31]=r0
        ;;
-(p7)   adds r31=XSI_PSR_I-XSI_PSR_IC,r31
+(p7)   adds r31=XSI_PSR_I_ADDR-XSI_PSR_IC,r31
+(p7)   mov r22=1
        ;;
-(p7)   st4 [r31]=r0
+(p7)   ld8 r31=[r31]
+       ;;
+(p7)   st1 [r31]=r22
        ;;
        mov r31 = in3
        mov b7 = loc2
index af1685b4dbbabe950e1c2ac47a7e6fe87ba3558d..db737b394060120e77c94dcfcb1dce1a95282731 100644 (file)
@@ -87,9 +87,14 @@ extern void xen_set_eflag(unsigned long);    /* see xen_ia64_setreg */
  * Others, like "pend", are abstractions based on privileged registers.
  * "Pend" is guaranteed to be set if reading cr.ivr would return a
  * (non-spurious) interrupt. */
-#define xen_get_virtual_psr_i()                (*(int *)(XSI_PSR_I))
-#define xen_set_virtual_psr_i(_val)    ({ *(int *)(XSI_PSR_I) = _val ? 1:0; })
-#define xen_set_virtual_psr_ic(_val)   ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
+#define XSI_PSR_I                      \
+       (*(uint64_t *)(XSI_PSR_I_ADDR))
+#define xen_get_virtual_psr_i()                \
+       (!(*(uint8_t *)(XSI_PSR_I)))
+#define xen_set_virtual_psr_i(_val)    \
+       ({ *(uint8_t *)(XSI_PSR_I) = (uint8_t)(_val) ? 0:1; })
+#define xen_set_virtual_psr_ic(_val)   \
+       ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
 #define xen_get_virtual_pend()         (*(int *)(XSI_PEND))
 
 /* Hyperprivops are "break" instructions with a well-defined API.
index 72406cfb23075b5f829a4d00c2b41f73acca0759..de24bacaa62ad8ddfc48b52d913c76b5db4c2fdc 100755 (executable)
@@ -50,8 +50,8 @@ void foo(void)
        /* First is shared info page, and then arch specific vcpu context */
        DEFINE(XSI_BASE, SHAREDINFO_ADDR);
 
-       DEFINE(XSI_PSR_I_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_delivery_enabled)));
-       DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_delivery_enabled)));
+       DEFINE(XSI_PSR_I_ADDR_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_mask_addr)));
+       DEFINE(XSI_PSR_I_ADDR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_mask_addr)));
        DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
        DEFINE(XSI_IPSR_OFS, (XSI_OFS + offsetof(mapped_regs_t, ipsr)));
        DEFINE(XSI_IIP_OFS, (XSI_OFS + offsetof(mapped_regs_t, iip)));
@@ -104,5 +104,4 @@ void foo(void)
        DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pkrs[0])));
        DEFINE(XSI_TMP0_OFS, (XSI_OFS + offsetof(mapped_regs_t, tmp[0])));
        DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tmp[0])));
-       
 }
index 3f201369cab1f5176d95678d0c6e840021e1c912..fd2a5692e883a2ee66582bbf2229f3dd1c3cc4f4 100644 (file)
@@ -485,6 +485,8 @@ void new_thread(struct vcpu *v,
                regs->ar_rsc |= (2 << 2); /* force PL2/3 */
                VCPU(v, banknum) = 1;
                VCPU(v, metaphysical_mode) = 1;
+               VCPU(v, interrupt_mask_addr) =
+                   (uint64_t)SHAREDINFO_ADDR + INT_ENABLE_OFFSET(v);
        }
 }
 
index 70dfeac92b6c0457f48ef4ed79bf8dd1d9463eff..179c6c757e55492b154fc49ee91e2e0d6750e52d 100644 (file)
@@ -87,7 +87,7 @@
 //     r16 == cr.isr
 //     r17 == cr.iim
 //     r18 == XSI_PSR_IC_OFS
-//     r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+//     r19 == vpsr.ic
 //     r31 == pr
 GLOBAL_ENTRY(fast_hyperprivop)
 #ifndef FAST_HYPERPRIVOPS // see beginning of file
@@ -223,7 +223,7 @@ GLOBAL_ENTRY(fast_hyperprivop)
 //     r16 == cr.isr
 //     r17 == cr.iim
 //     r18 == XSI_PSR_IC
-//     r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+//     r19 == vpsr.ic 
 //     r31 == pr
 ENTRY(hyper_ssm_i)
 #ifndef FAST_SSM_I
@@ -278,11 +278,15 @@ ENTRY(hyper_ssm_i)
        movl r27=~(IA64_PSR_BE|IA64_PSR_BN);;
        or r30=r30,r28;;
        and r30=r30,r27;;
+       mov r20=1
+       adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r22=[r22]
        st8 [r21]=r30 ;;
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st8 [r18]=r0;;
+       st1 [r22]=r20;;
+       st4 [r18]=r0;;
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
        cover ;;
@@ -405,9 +409,10 @@ GLOBAL_ENTRY(fast_tick_reflect)
        cmp.eq p6,p0=r16,r0;;
 (p6)   br.cond.spnt.few fast_tick_reflect_done;;
        // if guest vpsr.i is off, we're done
-       adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;;
-       ld4 r21=[r21];;
-       cmp.eq p6,p0=r21,r0
+       adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r21=[r21];;
+       ld1 r21=[r21];;
+       cmp.eq p0,p6=r21,r0
 (p6)   br.cond.spnt.few fast_tick_reflect_done;;
 
        // OK, we have a clock tick to deliver to the active domain!
@@ -445,17 +450,22 @@ GLOBAL_ENTRY(fast_tick_reflect)
        dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
        or r17=r17,r28;;
        and r17=r17,r27;;
-       ld4 r16=[r18],4;;
+       ld4 r16=[r18],XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS;;
        cmp.ne p6,p0=r16,r0;;
+       ld8 r16=[r18],XSI_PSR_IC_OFS-XSI_PSR_I_ADDR_OFS
 (p6)   dep r17=-1,r17,IA64_PSR_IC_BIT,1 ;;
-       ld4 r16=[r18],-4;;
-       cmp.ne p6,p0=r16,r0;;
+       ld1 r16=[r16];;
+       cmp.eq p6,p0=r16,r0;;
 (p6)   dep r17=-1,r17,IA64_PSR_I_BIT,1 ;;
+       mov r20=1
+       adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r22=[r22]
        st8 [r21]=r17 ;;
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st8 [r18]=r0;;
+       st1 [r22]=r20;;
+       st4 [r18]=r0;;
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
        cover ;;
@@ -530,7 +540,7 @@ END(fast_tick_reflect)
 //     r16 == cr.isr
 //     r17 == cr.iim
 //     r18 == XSI_PSR_IC
-//     r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+//     r19 == vpsr.ic
 //     r31 == pr
 GLOBAL_ENTRY(fast_break_reflect)
 #ifndef FAST_BREAK // see beginning of file
@@ -594,12 +604,13 @@ ENTRY(fast_reflect)
 #endif
        // save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!)
        adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
-       st8 [r21]=r29;;
+       st8 [r21]=r29,XSI_ISR_OFS-XSI_IIP_OFS;;
        // set shared_mem isr
-       adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 
        st8 [r21]=r16 ;;
        // set cr.ipsr
+       adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        mov r29=r30 ;;
+       ld8 r21=[r21]
        movl r28=DELIVER_PSR_SET;;
        movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
        or r29=r29,r28;;
@@ -616,19 +627,22 @@ ENTRY(fast_reflect)
        or r30=r30,r28;;
        and r30=r30,r27;;
        // also set shared_mem ipsr.i and ipsr.ic appropriately
-       ld8 r24=[r18];;
-       extr.u r22=r24,32,32
+       ld1 r22=[r21]
+       ld4 r24=[r18];;
        cmp4.eq p6,p7=r24,r0;;
 (p6)   dep r30=0,r30,IA64_PSR_IC_BIT,1
 (p7)   dep r30=-1,r30,IA64_PSR_IC_BIT,1 ;;
-       cmp4.eq p6,p7=r22,r0;;
+       mov r24=r21
+       cmp.ne p6,p7=r22,r0;;
 (p6)   dep r30=0,r30,IA64_PSR_I_BIT,1
 (p7)   dep r30=-1,r30,IA64_PSR_I_BIT,1 ;;
+       mov r22=1
        adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
        st8 [r21]=r30 ;;
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st8 [r18]=r0;;
+       st1 [r24]=r22
+       st4 [r18]=r0;;
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
        cover ;;
@@ -639,8 +653,6 @@ ENTRY(fast_reflect)
        st8 [r21]=r0 ;;
        adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
        st8 [r21]=r24 ;;
-       // vpsr.i = vpsr.ic = 0 on delivery of interruption
-       st8 [r18]=r0;;
        // FIXME: need to save iipa and isr to be arch-compliant
        // set iip to go to domain IVA break instruction vector
        movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
@@ -723,7 +735,7 @@ GLOBAL_ENTRY(fast_access_reflect)
        cmp.eq p7,p0=r21,r0
 (p7)   br.spnt.few dispatch_reflection ;;
        movl r18=XSI_PSR_IC;;
-       ld8 r21=[r18];;
+       ld4 r21=[r18];;
        cmp.eq p7,p0=r0,r21
 (p7)   br.spnt.few dispatch_reflection ;;
        // set shared_mem ifa, FIXME: should we validate it?
@@ -1062,17 +1074,20 @@ just_do_rfi:
        dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
        mov cr.ifs=r20 ;;
        // ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
+       adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
        // vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
-       mov r19=r0 ;;
+       ld8 r20=[r20]
+       mov r19=1 
        extr.u r23=r21,IA64_PSR_I_BIT,1 ;;
        cmp.ne p7,p6=r23,r0 ;;
        // not done yet
-(p7)   dep r19=-1,r19,32,1
+(p7)   st1 [r20]=r0
+(p6)   st1 [r20]=r19;;
        extr.u r23=r21,IA64_PSR_IC_BIT,1 ;;
        cmp.ne p7,p6=r23,r0 ;;
-(p7)   dep r19=-1,r19,0,1 ;;
-       st8 [r18]=r19 ;;
+(p7)   st4 [r18]=r19;;
+(p6)   st4 [r18]=r0;;
        // force on psr.ic, i, dt, rt, it, bn
        movl r20=(IA64_PSR_I|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT|IA64_PSR_BN)
        ;;
@@ -1209,10 +1224,12 @@ GLOBAL_ENTRY(rfi_with_interrupt)
        extr.u r20=r21,41,2 ;;  // get v(!)psr.ri
        dep r16=r20,r16,41,2 ;; // deposit cr.isr.ei
        adds r22=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 
-       st8 [r22]=r16 ;;
+       st8 [r22]=r16,XSI_PSR_I_ADDR_OFS-XSI_ISR_OFS ;;
        // set cr.ipsr (make sure cpl==2!)
        mov r29=r17 ;;
        movl r28=DELIVER_PSR_SET;;
+       mov r20=1
+       ld8 r22=[r22]
        movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
        or r29=r29,r28;;
        and r29=r29,r27;;
@@ -1220,7 +1237,8 @@ GLOBAL_ENTRY(rfi_with_interrupt)
        // v.ipsr and v.iip are already set (and v.iip validated) as rfi target
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st8 [r18]=r0;;
+       st1 [r22]=r20
+       st4 [r18]=r0;;
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
 #if 0
index 12419e52d11000823259f145b09f338a5aadc022..e44ae232ed9089685b8e9d73cd4f756440e99fb6 100644 (file)
@@ -930,7 +930,7 @@ ENTRY(break_fault)
 #endif
        movl r18=XSI_PSR_IC
        ;;
-       ld8 r19=[r18]
+       ld4 r19=[r18]
        ;;
        cmp.eq p7,p0=r0,r17                     // is this a psuedo-cover?
 (p7)   br.spnt.many dispatch_privop_fault
index b3bf6d9a917a8429fc654afd520b45f40c5ce206..03445c2de9e0fa241d2fa461ed0958fe54105bf8 100644 (file)
@@ -206,9 +206,9 @@ void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long
 #ifdef CONFIG_SMP
 #warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
 #endif
-       regs->r31 = (unsigned long) &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
+       regs->r31 = XSI_IPSR;
 
-       PSCB(v,interrupt_delivery_enabled) = 0;
+       v->vcpu_info->evtchn_upcall_mask = 1;
        PSCB(v,interrupt_collection_enabled) = 0;
 
        inc_slow_reflect_count(vector);
index 8bf3bac9af0f3fe7445c3ab6bae9ba819bdacc68..10abd30aca48ed88b9f6b203adf27ccd06004041 100644 (file)
@@ -197,7 +197,8 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
        ipsr = (struct ia64_psr *)&regs->cr_ipsr;
        imm = *(struct ia64_psr *)&imm24;
        // interrupt flag
-       if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
+       if (imm.i)
+           vcpu->vcpu_info->evtchn_upcall_mask = 1;
        if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 0;
        // interrupt collection flag
        //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
@@ -232,7 +233,7 @@ IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
 
 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
 {
-       PSCB(vcpu,interrupt_delivery_enabled) = 1;
+       vcpu->vcpu_info->evtchn_upcall_mask = 0;
        PSCB(vcpu,interrupt_collection_enabled) = 1;
        return IA64_NO_FAULT;
 }
@@ -261,11 +262,11 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
        }
        if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
        if (imm.i) {
-               if (!PSCB(vcpu,interrupt_delivery_enabled)) {
+               if (vcpu->vcpu_info->evtchn_upcall_mask) {
 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
                        enabling_interrupts = 1;
                }
-               PSCB(vcpu,interrupt_delivery_enabled) = 1;
+               vcpu->vcpu_info->evtchn_upcall_mask = 0;
        }
        if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
        // TODO: do this faster
@@ -312,9 +313,9 @@ IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
        if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
        if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
        if (newpsr.i) {
-               if (!PSCB(vcpu,interrupt_delivery_enabled))
+               if (vcpu->vcpu_info->evtchn_upcall_mask)
                        enabling_interrupts = 1;
-               PSCB(vcpu,interrupt_delivery_enabled) = 1;
+               vcpu->vcpu_info->evtchn_upcall_mask = 0;
        }
        if (newpsr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
        if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
@@ -340,7 +341,7 @@ IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval)
 
        newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
        if (newpsr.cpl == 2) newpsr.cpl = 0;
-       if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
+       if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1;
        else newpsr.i = 0;
        if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
        else newpsr.ic = 0;
@@ -360,7 +361,7 @@ BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
 
 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
 {
-       return !!PSCB(vcpu,interrupt_delivery_enabled);
+       return !vcpu->vcpu_info->evtchn_upcall_mask;
 }
 
 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
@@ -373,7 +374,7 @@ UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
        psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
        psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
        psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
-       psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
+       psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
        psr.ia64_psr.bn = PSCB(vcpu,banknum);
        psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
        if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
@@ -931,7 +932,7 @@ IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT64 val)
        bits &= ~(1L << bitnum);
        *p = bits;
        /* clearing an eoi bit may unmask another pending interrupt... */
-       if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
+       if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
                // worry about this later... Linux only calls eoi
                // with interrupts disabled
                printf("Trying to EOI interrupt with interrupts enabled\n");
@@ -1186,7 +1187,6 @@ IA64FAULT vcpu_rfi(VCPU *vcpu)
 
        psr.i64 = PSCB(vcpu,ipsr);
        if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
-       if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
        int_enable = psr.ia64_psr.i;
        if (psr.ia64_psr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
        if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
@@ -1218,7 +1218,7 @@ IA64FAULT vcpu_rfi(VCPU *vcpu)
        }
        PSCB(vcpu,interrupt_collection_enabled) = 1;
        vcpu_bsw1(vcpu);
-       PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
+       vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
        return (IA64_NO_FAULT);
 }
 
index e9b29a43972971f65cc2f9e69fb4875cbefc0771..77f960af18b86543a6af0ee716cd618c8aca8c7d 100644 (file)
@@ -111,7 +111,7 @@ xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
        if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
                printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
                        regs->cr_iip /*,
-                       VCPU(current,interrupt_delivery_enabled),
+                       !current->vcpu_info->evtchn_upcall_mask,
                        VCPU(current,pending_interruption) */);
                count = 0;
        }
index 20da8cde393ffaa27c50819c672214c967f670b7..ce834eed95279819e29e35af61aa242d20f0e54f 100644 (file)
@@ -39,6 +39,9 @@ struct arch_domain {
 #define xen_vastart arch.xen_vastart
 #define xen_vaend arch.xen_vaend
 #define shared_info_va arch.shared_info_va
+#define INT_ENABLE_OFFSET(v)             \
+    (sizeof(vcpu_info_t) * (v)->vcpu_id + \
+    offsetof(vcpu_info_t, evtchn_upcall_mask))
 
 struct arch_vcpu {
 #if 1
index ef339169dd25a997b5b52c1f5a142a6d39e552be..45e857321bafe7634cfd0a3460fe282d30a438d5 100644 (file)
@@ -268,7 +268,11 @@ typedef struct {
             unsigned long precover_ifs;
             unsigned long unat;  // not sure if this is needed until NaT arch is done
             int interrupt_collection_enabled; // virtual psr.ic
-            int interrupt_delivery_enabled; // virtual psr.i
+            /* virtual interrupt deliverable flag is evtchn_upcall_mask in
+             * shared info area now. interrupt_mask_addr is the address
+             * of evtchn_upcall_mask for current vcpu
+             */
+            unsigned long interrupt_mask_addr;
             int pending_interruption;
             int incomplete_regframe; // see SDM vol2 6.8
             unsigned long reserved5_1[4];